dma_addr_t *dma_handle, int gfp)
{
void *ret;
+ unsigned int order = get_order(size);
+ unsigned long vstart;
+
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
gfp |= GFP_DMA;
- ret = (void *)__get_free_pages(gfp, get_order(size));
+ ret = (void *)vstart = __get_free_pages(gfp, order);
+ if (ret == NULL)
+ return ret;
- if (ret != NULL) {
- memset(ret, 0, size);
- *dma_handle = virt_to_bus(ret);
+ /*
+ * Ensure multi-page extents are contiguous in machine memory.
+ * This code could be cleaned up some, and the number of
+ * hypercalls reduced.
+ */
+ if (size > PAGE_SIZE) {
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long pfn, i;
+ /* 1. Zap current PTEs, giving away the underlying pages. */
+ for (i = 0; i < (1<<order); i++) {
+ pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
+ pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+ pfn = pte->pte_low >> PAGE_SHIFT;
+ queue_l1_entry_update(pte, 0);
+ flush_page_update_queue();
+ if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
+ &pfn, 1, 0) != 1) BUG();
+ }
+ /* 2. Get a new contiguous memory extent. */
+ if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
+ &pfn, 1, order) != 1) BUG();
+ /* 3. Map the new extent in place of old pages. */
+ for (i = 0; i < (1<<order); i++) {
+ pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
+ pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+ queue_l1_entry_update(
+ pte, ((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
+ queue_machphys_update(
+ pfn+i, (__pa(ret)>>PAGE_SHIFT)+i);
+ phys_to_machine_mapping[(__pa(ret)>>PAGE_SHIFT)+i] =
+ pfn+i;
+ flush_page_update_queue();
+ }
+ flush_page_update_queue();
}
+
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+
return ret;
}